Generating Association Rules
# supp - Minimum frequency of (itemset in transactions)
# conf - Minimum confidence of rule (X -> Y)
rules <- apriori(transactions, parameter = list(supp = 0.01, conf = 0))
## Apriori
##
## Parameter specification:
## confidence minval smax arem aval originalSupport maxtime support minlen
## 0 0.1 1 none FALSE TRUE 5 0.01 1
## maxlen target ext
## 10 rules TRUE
##
## Algorithmic control:
## filter tree heap memopt load sort verbose
## 0.1 TRUE TRUE FALSE TRUE 2 TRUE
##
## Absolute minimum support count: 100
##
## set item appearances ...[0 item(s)] done [0.00s].
## set transactions ...[98 item(s), 10000 transaction(s)] done [0.00s].
## sorting and recoding items ... [89 item(s)] done [0.00s].
## creating transaction tree ... done [0.00s].
## checking subsets of size 1 2 3 4 5 done [0.01s].
## writing ... [11524 rule(s)] done [0.00s].
## creating S4 object ... done [0.00s].
length(rules)
## [1] 11524
# Do sets (1, 2) -> 3 and (2, 3) -> 1 have different confidence values?
# Confidence formula examples:
# conf = P(3 | (1, 2)) = P(1, 2, 3) / P(1, 2)
# conf = P(1 | (2, 3)) = P(1, 2, 3) / P(2, 3)
# Answer: Yes, they are two different rules.
rules_high_conf <- subset(rules, confidence >= 0.5)
length(rules_high_conf)
## [1] 1165
Visualizing Rules
Support vs Confidence
plot(rules,
engine = "ggplot2",
measure = c("support", "confidence"),
shading = "lift",
main = "Support and Confidence") +
scale_color_gradientn(
colors = colorRampPalette(c("white", "red"))(20),
limits = c(0, 10),
na.value = "blue"
) +
labs(x = "Support", y = "Confidence", color = "Lift") +
theme_minimal()
## To reduce overplotting, jitter is added! Use jitter = 0 to prevent jitter.
## Scale for colour is already present.
## Adding another scale for colour, which will replace the existing scale.

Support vs Lift
plot(rules,
engine = "ggplot2",
measure = c("support", "lift"),
shading = "confidence",
main = "Support vs Lift") +
scale_color_gradientn(
colors = colorRampPalette(c("white", "red"))(20),
na.value = "blue"
) +
labs(x = "Support", y = "Lift", color = "Confidence") +
theme_minimal()
## To reduce overplotting, jitter is added! Use jitter = 0 to prevent jitter.
## Scale for colour is already present.
## Adding another scale for colour, which will replace the existing scale.

Rules with High Support
rules_10sup <- subset(rules, support >= 0.1)
inspect(rules_10sup)
## lhs rhs support confidence coverage lift count
## [1] {} => {item66} 0.1006 0.1006000 1.0000 1.0000000 1006
## [2] {} => {item15} 0.1041 0.1041000 1.0000 1.0000000 1041
## [3] {} => {item34} 0.1111 0.1111000 1.0000 1.0000000 1111
## [4] {} => {item24} 0.1104 0.1104000 1.0000 1.0000000 1104
## [5] {} => {item62} 0.1163 0.1163000 1.0000 1.0000000 1163
## [6] {} => {item25} 0.1254 0.1254000 1.0000 1.0000000 1254
## [7] {} => {item7} 0.1245 0.1245000 1.0000 1.0000000 1245
## [8] {} => {item61} 0.1291 0.1291000 1.0000 1.0000000 1291
## [9] {} => {item96} 0.1269 0.1269000 1.0000 1.0000000 1269
## [10] {} => {item74} 0.1416 0.1416000 1.0000 1.0000000 1416
## [11] {} => {item41} 0.1382 0.1382000 1.0000 1.0000000 1382
## [12] {} => {item9} 0.1412 0.1412000 1.0000 1.0000000 1412
## [13] {} => {item28} 0.1422 0.1422000 1.0000 1.0000000 1422
## [14] {} => {item50} 0.1397 0.1397000 1.0000 1.0000000 1397
## [15] {} => {item53} 0.1459 0.1459000 1.0000 1.0000000 1459
## [16] {} => {item95} 0.1502 0.1502000 1.0000 1.0000000 1502
## [17] {} => {item99} 0.1568 0.1568000 1.0000 1.0000000 1568
## [18] {} => {item91} 0.1569 0.1569000 1.0000 1.0000000 1569
## [19] {} => {item1} 0.1718 0.1718000 1.0000 1.0000000 1718
## [20] {} => {item31} 0.1727 0.1727000 1.0000 1.0000000 1727
## [21] {} => {item77} 0.1737 0.1737000 1.0000 1.0000000 1737
## [22] {} => {item35} 0.1742 0.1742000 1.0000 1.0000000 1742
## [23] {} => {item8} 0.1764 0.1764000 1.0000 1.0000000 1764
## [24] {} => {item75} 0.1802 0.1802000 1.0000 1.0000000 1802
## [25] {} => {item20} 0.1845 0.1845000 1.0000 1.0000000 1845
## [26] {} => {item76} 0.1889 0.1889000 1.0000 1.0000000 1889
## [27] {} => {item69} 0.1907 0.1907000 1.0000 1.0000000 1907
## [28] {} => {item37} 0.1969 0.1969000 1.0000 1.0000000 1969
## [29] {} => {item16} 0.2050 0.2050000 1.0000 1.0000000 2050
## [30] {} => {item3} 0.2133 0.2133000 1.0000 1.0000000 2133
## [31] {} => {item42} 0.2291 0.2291000 1.0000 1.0000000 2291
## [32] {} => {item84} 0.2365 0.2365000 1.0000 1.0000000 2365
## [33] {} => {item92} 0.2630 0.2630000 1.0000 1.0000000 2630
## [34] {} => {item21} 0.2769 0.2769000 1.0000 1.0000000 2769
## [35] {} => {item58} 0.2831 0.2831000 1.0000 1.0000000 2831
## [36] {} => {item10} 0.3035 0.3035000 1.0000 1.0000000 3035
## [37] {} => {item30} 0.3308 0.3308000 1.0000 1.0000000 3308
## [38] {} => {item5} 0.3699 0.3699000 1.0000 1.0000000 3699
## [39] {} => {item13} 0.4948 0.4948000 1.0000 1.0000000 4948
## [40] {item20} => {item13} 0.1034 0.5604336 0.1845 1.1326467 1034
## [41] {item13} => {item20} 0.1034 0.2089733 0.4948 1.1326467 1034
## [42] {item37} => {item13} 0.1104 0.5606907 0.1969 1.1331663 1104
## [43] {item13} => {item37} 0.1104 0.2231205 0.4948 1.1331663 1104
## [44] {item16} => {item13} 0.1017 0.4960976 0.2050 1.0026224 1017
## [45] {item13} => {item16} 0.1017 0.2055376 0.4948 1.0026224 1017
## [46] {item3} => {item13} 0.1164 0.5457103 0.2133 1.1028906 1164
## [47] {item13} => {item3} 0.1164 0.2352466 0.4948 1.1028906 1164
## [48] {item42} => {item13} 0.1200 0.5237887 0.2291 1.0585868 1200
## [49] {item13} => {item42} 0.1200 0.2425222 0.4948 1.0585868 1200
## [50] {item84} => {item13} 0.1239 0.5238901 0.2365 1.0587916 1239
## [51] {item13} => {item84} 0.1239 0.2504042 0.4948 1.0587916 1239
## [52] {item92} => {item13} 0.1290 0.4904943 0.2630 0.9912981 1290
## [53] {item13} => {item92} 0.1290 0.2607114 0.4948 0.9912981 1290
## [54] {item21} => {item30} 0.1010 0.3647526 0.2769 1.1026379 1010
## [55] {item30} => {item21} 0.1010 0.3053204 0.3308 1.1026379 1010
## [56] {item21} => {item13} 0.1391 0.5023474 0.2769 1.0152535 1391
## [57] {item13} => {item21} 0.1391 0.2811237 0.4948 1.0152535 1391
## [58] {item58} => {item5} 0.1221 0.4312964 0.2831 1.1659810 1221
## [59] {item5} => {item58} 0.1221 0.3300892 0.3699 1.1659810 1221
## [60] {item58} => {item13} 0.1478 0.5220770 0.2831 1.0551273 1478
## [61] {item13} => {item58} 0.1478 0.2987065 0.4948 1.0551273 1478
## [62] {item10} => {item30} 0.1138 0.3749588 0.3035 1.1334910 1138
## [63] {item30} => {item10} 0.1138 0.3440145 0.3308 1.1334910 1138
## [64] {item10} => {item5} 0.1204 0.3967051 0.3035 1.0724658 1204
## [65] {item5} => {item10} 0.1204 0.3254934 0.3699 1.0724658 1204
## [66] {item10} => {item13} 0.1492 0.4915980 0.3035 0.9935287 1492
## [67] {item13} => {item10} 0.1492 0.3015360 0.4948 0.9935287 1492
## [68] {item30} => {item5} 0.1276 0.3857316 0.3308 1.0427996 1276
## [69] {item5} => {item30} 0.1276 0.3449581 0.3699 1.0427996 1276
## [70] {item30} => {item13} 0.1748 0.5284160 0.3308 1.0679385 1748
## [71] {item13} => {item30} 0.1748 0.3532741 0.4948 1.0679385 1748
## [72] {item5} => {item13} 0.1877 0.5074344 0.3699 1.0255344 1877
## [73] {item13} => {item5} 0.1877 0.3793452 0.4948 1.0255344 1877
rules_df <- as(rules_10sup, "data.frame")
fig <- plot_ly(
data = rules_df,
x = ~support,
y = ~lift,
text = ~paste("Confidence: ", round(confidence, 2)),
type = 'scatter',
mode = 'markers',
marker = list(size = 10, color = ~confidence, colorscale = "Viridis", showscale = TRUE)
)
fig
Matrix Visualization
rules_10conf <- subset(rules, confidence > 0.8)
inspect(rules_10conf)
## lhs rhs support confidence coverage lift
## [1] {item55} => {item34} 0.0100 0.8547009 0.0117 7.693077
## [2] {item83} => {item13} 0.0119 0.8439716 0.0141 1.705682
## [3] {item23} => {item13} 0.0292 0.8613569 0.0339 1.740818
## [4] {item10, item44} => {item13} 0.0101 0.8487395 0.0119 1.715318
## [5] {item20, item23} => {item13} 0.0114 0.9120000 0.0125 1.843169
## [6] {item23, item5} => {item13} 0.0105 0.8400000 0.0125 1.697656
## [7] {item49, item56} => {item15} 0.0101 0.9528302 0.0106 9.153028
## [8] {item15, item49} => {item56} 0.0101 0.8632479 0.0117 14.883584
## [9] {item49, item56} => {item84} 0.0100 0.9433962 0.0106 3.988990
## [10] {item49, item56} => {item30} 0.0105 0.9905660 0.0106 2.994456
## [11] {item15, item49} => {item84} 0.0102 0.8717949 0.0117 3.686236
## [12] {item15, item49} => {item30} 0.0105 0.8974359 0.0117 2.712926
## [13] {item82, item99} => {item5} 0.0150 0.8333333 0.0180 2.252861
## [14] {item82, item99} => {item13} 0.0154 0.8555556 0.0180 1.729094
## [15] {item15, item49, item56} => {item30} 0.0101 1.0000000 0.0101 3.022975
## [16] {item30, item49, item56} => {item15} 0.0101 0.9619048 0.0105 9.240199
## [17] {item15, item30, item49} => {item56} 0.0101 0.9619048 0.0105 16.584565
## [18] {item49, item56, item84} => {item30} 0.0100 1.0000000 0.0100 3.022975
## [19] {item30, item49, item56} => {item84} 0.0100 0.9523810 0.0105 4.026981
## [20] {item15, item49, item84} => {item30} 0.0100 0.9803922 0.0102 2.963701
## [21] {item15, item30, item49} => {item84} 0.0100 0.9523810 0.0105 4.026981
## [22] {item49, item77, item84} => {item30} 0.0101 0.9266055 0.0109 2.801105
## [23] {item30, item49, item84} => {item77} 0.0101 0.8080000 0.0125 4.651698
## [24] {item5, item82, item99} => {item13} 0.0134 0.8933333 0.0150 1.805443
## [25] {item13, item82, item99} => {item5} 0.0134 0.8701299 0.0154 2.352338
## [26] {item15, item56, item77} => {item30} 0.0100 0.9523810 0.0105 2.879023
## [27] {item30, item56, item77} => {item15} 0.0100 0.8196721 0.0122 7.873892
## [28] {item15, item56, item84} => {item30} 0.0106 0.9298246 0.0114 2.810836
## [29] {item15, item30, item56} => {item84} 0.0106 0.8091603 0.0131 3.421397
## [30] {item22, item3, item41} => {item10} 0.0118 0.8550725 0.0138 2.817372
## [31] {item10, item22, item41} => {item3} 0.0118 0.8082192 0.0146 3.789119
## [32] {item25, item34, item77} => {item5} 0.0103 0.8583333 0.0120 2.320447
## [33] {item16, item34, item77} => {item5} 0.0102 0.9026549 0.0113 2.440267
## [34] {item20, item25, item41} => {item92} 0.0100 0.8064516 0.0124 3.066356
## [35] {item16, item25, item77} => {item5} 0.0104 0.8062016 0.0129 2.179512
## [36] {item16, item61, item77} => {item5} 0.0108 0.9230769 0.0117 2.495477
## [37] {item30, item95, item96} => {item13} 0.0118 0.8027211 0.0147 1.622314
## [38] {item3, item84, item95} => {item13} 0.0108 0.8780488 0.0123 1.774553
## count
## [1] 100
## [2] 119
## [3] 292
## [4] 101
## [5] 114
## [6] 105
## [7] 101
## [8] 101
## [9] 100
## [10] 105
## [11] 102
## [12] 105
## [13] 150
## [14] 154
## [15] 101
## [16] 101
## [17] 101
## [18] 100
## [19] 100
## [20] 100
## [21] 100
## [22] 101
## [23] 101
## [24] 134
## [25] 134
## [26] 100
## [27] 100
## [28] 106
## [29] 106
## [30] 118
## [31] 118
## [32] 103
## [33] 102
## [34] 100
## [35] 104
## [36] 108
## [37] 118
## [38] 108
plot(rules_10conf, measure = "lift", method = "matrix", control=list(reorder='none'))
## Itemsets in Antecedent (LHS)
## [1] "{item55}" "{item83}" "{item23}"
## [4] "{item10,item44}" "{item20,item23}" "{item23,item5}"
## [7] "{item49,item56}" "{item15,item49}" "{item82,item99}"
## [10] "{item15,item49,item56}" "{item30,item49,item56}" "{item15,item30,item49}"
## [13] "{item49,item56,item84}" "{item15,item49,item84}" "{item49,item77,item84}"
## [16] "{item30,item49,item84}" "{item5,item82,item99}" "{item13,item82,item99}"
## [19] "{item15,item56,item77}" "{item30,item56,item77}" "{item15,item56,item84}"
## [22] "{item15,item30,item56}" "{item22,item3,item41}" "{item10,item22,item41}"
## [25] "{item25,item34,item77}" "{item16,item34,item77}" "{item20,item25,item41}"
## [28] "{item16,item25,item77}" "{item16,item61,item77}" "{item30,item95,item96}"
## [31] "{item3,item84,item95}"
## Itemsets in Consequent (RHS)
## [1] "{item92}" "{item3}" "{item10}" "{item77}" "{item5}" "{item30}"
## [7] "{item84}" "{item56}" "{item15}" "{item13}" "{item34}"

Graph Visualization
rules_3conf <- sort(rules, by = "lift")[0:3]
plot(rules_3conf, method = "graph", engine = "igraph")

Splitting Data and Running Algorithm Again
# Split sets and form rules again
train_transactions <- transactions[1:8000]
test_transactions <- transactions[8001:10000]
# Form rules on training data
rules_train <- apriori(train_transactions, parameter = list(supp = 0.01, conf = 0.8))
## Apriori
##
## Parameter specification:
## confidence minval smax arem aval originalSupport maxtime support minlen
## 0.8 0.1 1 none FALSE TRUE 5 0.01 1
## maxlen target ext
## 10 rules TRUE
##
## Algorithmic control:
## filter tree heap memopt load sort verbose
## 0.1 TRUE TRUE FALSE TRUE 2 TRUE
##
## Absolute minimum support count: 80
##
## set item appearances ...[0 item(s)] done [0.00s].
## set transactions ...[98 item(s), 8000 transaction(s)] done [0.00s].
## sorting and recoding items ... [89 item(s)] done [0.00s].
## creating transaction tree ... done [0.00s].
## checking subsets of size 1 2 3 4 5 done [0.01s].
## writing ... [50 rule(s)] done [0.00s].
## creating S4 object ... done [0.00s].
rules_train_select <- subset(rules_train, lift > 3)
inspect(rules_train_select)
## lhs rhs support confidence coverage
## [1] {item55} => {item34} 0.010125 0.8526316 0.011875
## [2] {item49, item56} => {item15} 0.010375 0.9540230 0.010875
## [3] {item15, item49} => {item56} 0.010375 0.8829787 0.011750
## [4] {item49, item56} => {item84} 0.010250 0.9425287 0.010875
## [5] {item15, item49} => {item84} 0.010500 0.8936170 0.011750
## [6] {item15, item49, item56} => {item84} 0.010000 0.9638554 0.010375
## [7] {item49, item56, item84} => {item15} 0.010000 0.9756098 0.010250
## [8] {item15, item49, item84} => {item56} 0.010000 0.9523810 0.010500
## [9] {item15, item56, item84} => {item49} 0.010000 0.8602151 0.011625
## [10] {item15, item49, item56} => {item30} 0.010375 1.0000000 0.010375
## [11] {item30, item49, item56} => {item15} 0.010375 0.9651163 0.010750
## [12] {item15, item30, item49} => {item56} 0.010375 0.9651163 0.010750
## [13] {item49, item56, item84} => {item30} 0.010250 1.0000000 0.010250
## [14] {item30, item49, item56} => {item84} 0.010250 0.9534884 0.010750
## [15] {item15, item30, item49} => {item84} 0.010250 0.9534884 0.010750
## [16] {item30, item56, item77} => {item15} 0.010250 0.8282828 0.012375
## [17] {item30, item56, item84} => {item15} 0.010625 0.8173077 0.013000
## [18] {item30, item56, item77} => {item84} 0.010000 0.8080808 0.012375
## [19] {item20, item25, item41} => {item92} 0.010625 0.8333333 0.012750
## [20] {item25, item41, item92} => {item20} 0.010625 0.8252427 0.012875
## [21] {item15, item49, item56, item84} => {item30} 0.010000 1.0000000 0.010000
## [22] {item15, item30, item49, item56} => {item84} 0.010000 0.9638554 0.010375
## [23] {item30, item49, item56, item84} => {item15} 0.010000 0.9756098 0.010250
## [24] {item15, item30, item49, item84} => {item56} 0.010000 0.9756098 0.010250
## [25] {item15, item30, item56, item84} => {item49} 0.010000 0.9411765 0.010625
## lift count
## [1] 7.698705 81
## [2] 9.251132 83
## [3] 15.456958 83
## [4] 4.034366 82
## [5] 3.825006 84
## [6] 4.125652 80
## [7] 9.460458 80
## [8] 16.671877 80
## [9] 21.305636 80
## [10] 3.021148 83
## [11] 9.358703 83
## [12] 16.894815 83
## [13] 3.021148 82
## [14] 4.081277 82
## [15] 4.081277 82
## [16] 8.031833 82
## [17] 7.925408 85
## [18] 3.458880 80
## [19] 3.152088 85
## [20] 4.457759 85
## [21] 3.021148 80
## [22] 4.125652 80
## [23] 9.460458 80
## [24] 17.078508 80
## [25] 23.310872 80
# Evaluate on test data
rules_test <- interestMeasure(rules_train_select, transactions = test_transactions, measure = c("support", "confidence", "lift", "count"), reuse = FALSE)
rules_train_data <- as(rules_train_select, "data.frame")
# Compare training and testing results
for (i in 1:length(rules_train_select)) {
print(paste("Rule:", rules_train_data$rules[i]))
print(paste(" Train: Conf: ", round(rules_train_data$confidence[i], digits = 2), " Lift: ", round(rules_train_data$lift[i], digits = 2)))
print(paste(" Test: Conf: ", round(rules_test$confidence[i], digits = 2), " Lift: ", round(rules_test$lift[i], digits = 2)))
}
## [1] "Rule: {item55} => {item34}"
## [1] " Train: Conf: 0.85 Lift: 7.7"
## [1] " Test: Conf: 0.86 Lift: 7.68"
## [1] "Rule: {item49,item56} => {item15}"
## [1] " Train: Conf: 0.95 Lift: 9.25"
## [1] " Test: Conf: 0.95 Lift: 8.77"
## [1] "Rule: {item15,item49} => {item56}"
## [1] " Train: Conf: 0.88 Lift: 15.46"
## [1] " Test: Conf: 0.78 Lift: 12.73"
## [1] "Rule: {item49,item56} => {item84}"
## [1] " Train: Conf: 0.94 Lift: 4.03"
## [1] " Test: Conf: 0.95 Lift: 3.82"
## [1] "Rule: {item15,item49} => {item84}"
## [1] " Train: Conf: 0.89 Lift: 3.83"
## [1] " Test: Conf: 0.78 Lift: 3.16"
## [1] "Rule: {item15,item49,item56} => {item84}"
## [1] " Train: Conf: 0.96 Lift: 4.13"
## [1] " Test: Conf: 1 Lift: 4.03"
## [1] "Rule: {item49,item56,item84} => {item15}"
## [1] " Train: Conf: 0.98 Lift: 9.46"
## [1] " Test: Conf: 1 Lift: 9.26"
## [1] "Rule: {item15,item49,item84} => {item56}"
## [1] " Train: Conf: 0.95 Lift: 16.67"
## [1] " Test: Conf: 1 Lift: 16.26"
## [1] "Rule: {item15,item56,item84} => {item49}"
## [1] " Train: Conf: 0.86 Lift: 21.31"
## [1] " Test: Conf: 0.86 Lift: 23.17"
## [1] "Rule: {item15,item49,item56} => {item30}"
## [1] " Train: Conf: 1 Lift: 3.02"
## [1] " Test: Conf: 1 Lift: 3.03"
## [1] "Rule: {item30,item49,item56} => {item15}"
## [1] " Train: Conf: 0.97 Lift: 9.36"
## [1] " Test: Conf: 0.95 Lift: 8.77"
## [1] "Rule: {item15,item30,item49} => {item56}"
## [1] " Train: Conf: 0.97 Lift: 16.89"
## [1] " Test: Conf: 0.95 Lift: 15.4"
## [1] "Rule: {item49,item56,item84} => {item30}"
## [1] " Train: Conf: 1 Lift: 3.02"
## [1] " Test: Conf: 1 Lift: 3.03"
## [1] "Rule: {item30,item49,item56} => {item84}"
## [1] " Train: Conf: 0.95 Lift: 4.08"
## [1] " Test: Conf: 0.95 Lift: 3.82"
## [1] "Rule: {item15,item30,item49} => {item84}"
## [1] " Train: Conf: 0.95 Lift: 4.08"
## [1] " Test: Conf: 0.95 Lift: 3.82"
## [1] "Rule: {item30,item56,item77} => {item15}"
## [1] " Train: Conf: 0.83 Lift: 8.03"
## [1] " Test: Conf: 0.78 Lift: 7.25"
## [1] "Rule: {item30,item56,item84} => {item15}"
## [1] " Train: Conf: 0.82 Lift: 7.93"
## [1] " Test: Conf: 0.68 Lift: 6.27"
## [1] "Rule: {item30,item56,item77} => {item84}"
## [1] " Train: Conf: 0.81 Lift: 3.46"
## [1] " Test: Conf: 0.83 Lift: 3.33"
## [1] "Rule: {item20,item25,item41} => {item92}"
## [1] " Train: Conf: 0.83 Lift: 3.15"
## [1] " Test: Conf: 0.68 Lift: 2.65"
## [1] "Rule: {item25,item41,item92} => {item20}"
## [1] " Train: Conf: 0.83 Lift: 4.46"
## [1] " Test: Conf: 0.58 Lift: 3.17"
## [1] "Rule: {item15,item49,item56,item84} => {item30}"
## [1] " Train: Conf: 1 Lift: 3.02"
## [1] " Test: Conf: 1 Lift: 3.03"
## [1] "Rule: {item15,item30,item49,item56} => {item84}"
## [1] " Train: Conf: 0.96 Lift: 4.13"
## [1] " Test: Conf: 1 Lift: 4.03"
## [1] "Rule: {item30,item49,item56,item84} => {item15}"
## [1] " Train: Conf: 0.98 Lift: 9.46"
## [1] " Test: Conf: 1 Lift: 9.26"
## [1] "Rule: {item15,item30,item49,item84} => {item56}"
## [1] " Train: Conf: 0.98 Lift: 17.08"
## [1] " Test: Conf: 1 Lift: 16.26"
## [1] "Rule: {item15,item30,item56,item84} => {item49}"
## [1] " Train: Conf: 0.94 Lift: 23.31"
## [1] " Test: Conf: 0.86 Lift: 23.17"